In [2]:
# import all the library
import numpy as np
import os
import glob
import cv2

import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

# Import all the dependent file
import transformation as trans

trans.main()

%matplotlib inline
%load_ext autoreload
%autoreload 2
The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload
In [3]:
#Load the images
test_image = glob.glob('./test_images/*.jpg')
test_out_dir = './output_images/'

Undistorted test image Visualization

In [4]:
gs = gridspec.GridSpec(8, 2)
gs.update(wspace=0.01, hspace=0.02) # set the spacing between axes.
plt.figure(figsize=(8,2))

for idx, name in enumerate(test_image):
    test_img = cv2.imread(name)
    test_dst = trans.undistort_img(test_img)
    
    test_image_name = os.path.split(name)[1]
    write_name = test_out_dir + 'undistort_test/undistorted_' + test_image_name
    cv2.imwrite(write_name, test_dst)
    
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
    ax1.imshow(cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB))
    ax1.set_title('Original Image', fontsize=30)
    ax2.imshow(cv2.cvtColor(test_dst, cv2.COLOR_BGR2RGB))
    ax2.set_title('Undistorted Image', fontsize=30)
<matplotlib.figure.Figure at 0x8259320>

Color & gradient threshold pipeline

In [5]:
gs = gridspec.GridSpec(8, 2)
gs.update(wspace=0.01, hspace=0.02) # set the spacing between axes.
plt.figure(figsize=(8,2))

img = cv2.imread('test_images/test6.jpg')
img    = trans.undistort_img(img)
binary = trans.sobel_binary_img(img, 'x')

color_binary = trans.color_thresh_img(img)

# Stack each channel
# Note color_binary[:, :, 0] is all 0s, effectively an all-black image. It might
# be beneficial to replace this channel with something else.
color_binary = np.dstack(( np.zeros_like(binary), binary, color_binary))

# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(color_binary, cmap='gray')
ax2.set_title('Binarize Image', fontsize=30)
Out[5]:
<matplotlib.text.Text at 0xa003b00>
<matplotlib.figure.Figure at 0x8d3e6d8>

Perspective Transform

In [6]:
#from transformation import warp_image

image = mpimg.imread('test_images/test6.jpg')

undist = trans.undistort_img(image)
size_top=70
size_bottom=370
warped = trans.warp_image(undist)
#cv2.imwrite('output_images/color_wrap__image.jpg', cv2.cvtColor(np.float32(warped), cv2.COLOR_RGB2BGR))

f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
#f.tight_layout()
ax1.axis('off')
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=50)
ax2.axis('off')
ax2.imshow(warped)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

Calculate Thresholded binary image

In [7]:
gs = gridspec.GridSpec(8, 2)
gs.update(wspace=0.01, hspace=0.02) # set the spacing between axes.
plt.figure(figsize=(8,2))

binary_warped = trans.sobel_binary_img(warped, 'x')

plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
plt.imshow(warped,cmap='gray')
plt.title('original')
#plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(binary_warped,cmap='gray') 
plt.title('Bird-eye view')
#plt.axis('off');
Out[7]:
<matplotlib.text.Text at 0xdadeef0>
<matplotlib.figure.Figure at 0x4c5e668>
In [8]:
pts = np.array([[380, 720], [600, 400], [800, 400], [1020, 720]], np.int32)
pts = pts.reshape((-1,1,2))

polyshape = cv2.fillPoly(np.zeros_like(undist), np.int_([pts]), (0,255, 0))
plt.imshow(cv2.cvtColor(polyshape,cv2.COLOR_BGR2RGB))
Out[8]:
<matplotlib.image.AxesImage at 0xfd519e8>
In [9]:
#polyshape = cv2.addWeighted( undist, 1, polyshape, 0.3, 0)
#plt.imshow(polyshape)

Histogram from the lane lines

In [10]:
%matplotlib inline
histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
plt.plot(histogram)
C:\Program Files\Anaconda3\envs\self-driving\lib\site-packages\ipykernel\__main__.py:2: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
  from ipykernel import kernelapp as app
Out[10]:
[<matplotlib.lines.Line2D at 0x10d28400>]

Sliding Window and Fit Lanes

In [11]:
import Finding_Lanes as f_lane
In [12]:
ploty, left_fitx, right_fitx, left_fit, right_fit = f_lane.fit_polynomials(binary_warped, debug=True)
C:\self-driving-car\P4-CarND-Advanced-Lane-Lines\Finding_Lanes.py:16: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
  histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
In [13]:
ploty, left_fitx, right_fitx, left_fit, right_fit = f_lane.fast_fit_polynomials(binary_warped, left_fit,  right_fit, debug=True)

Pipeline for the Video Rendering

In [14]:
def pipeline_process(image):
    
    # Step 1: udistort image
    img_undistort = trans.undistort_img(image)
    
    # step 2: perspective transform
    warped = trans.warp_image(img)
    
    # step 3: detect binary lane 
    binary_warped = trans.sobel_binary_img(warped, 'x')
    
    # step 4: fit polynomials
    ploty, left_fitx, right_fitx, left_fit, right_fit = f_lane.fit_polynomials(binary_warped)
    
    # step 5: draw line
    output_lane = f_lane.render_lane(img_undistort, ploty, left_fitx, right_fitx)
    
    # step 6: print curvature
    curv = f_lane.get_curvature(ploty, left_fitx, right_fitx)
    output_curvature = cv2.putText(output_lane, "Curvature: " + str(int(curv)) + "m", (900, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 0, 0], 2)
    
    #  step 7: print road position
    xm_per_pix = 3.7/700
    left_lane_pos = left_fitx[len(left_fitx)-1]
    right_lane_pos = right_fitx[len(right_fitx)-1]
    road_pos = (((left_lane_pos + right_lane_pos) / 2) - 640) * xm_per_pix
    output_road_pos = cv2.putText(output_lane, "Offset: {0:.2f}m".format(road_pos), (900, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 0, 0], 2)

    # output from processing step
    output_image = output_road_pos
        
    # function should always output color images
    if len(output_image.shape) == 2:
        return cv2.cvtColor(np.float32(output_image), cv2.COLOR_GRAY2RGB)
    else:
        return output_image
In [15]:
# Read in a thresholded image
image = mpimg.imread('test_images/test5.jpg')
img = pipeline_process(image)
cv2.imwrite('output_images/final_image.jpg', cv2.cvtColor(np.float32(img), cv2.COLOR_RGB2BGR))
plt.imshow(img)
C:\self-driving-car\P4-CarND-Advanced-Lane-Lines\Finding_Lanes.py:16: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
  histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
Out[15]:
<matplotlib.image.AxesImage at 0x872fd68>
In [16]:
# Import everything needed to edit/save/watch video clips
import imageio
from moviepy.editor import VideoFileClip
from IPython.display import HTML
In [17]:
project_output = 'project_video_output.mp4'
clip = VideoFileClip("project_video.mp4");
white_clip = clip.fl_image(pipeline_process) #NOTE: this function expects color images!!
%time white_clip.write_videofile(project_output, audio=False);
[MoviePy] >>>> Building video project_video_output.mp4
[MoviePy] Writing video project_video_output.mp4
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▉| 1260/1261 [10:10<00:00,  3.55it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: project_video_output.mp4 

Wall time: 10min 13s
In [ ]:
challenge_output = 'challenge_video_output.mp4'
clip2 = VideoFileClip("challenge_video.mp4");
white_clip = clip2.fl_image(pipeline_process) #NOTE: this function expects color images!!
%time white_clip.write_videofile(challenge_output, audio=False);
In [ ]:
harder_challenge_output = 'harder_challenge_video_output.mp4'
clip2 = VideoFileClip("harder_challenge_video.mp4");
white_clip = clip2.fl_image(pipeline_process) #NOTE: this function expects color images!!
%time white_clip.write_videofile(harder_challenge_output, audio=False);
In [ ]: